- add sources.
[platform/framework/web/crosswalk.git] / src / gpu / command_buffer / service / in_process_command_buffer.cc
1 // Copyright 2013 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4
5 #include "gpu/command_buffer/service/in_process_command_buffer.h"
6
7 #include <queue>
8 #include <utility>
9
10 #include <GLES2/gl2.h>
11 #ifndef GL_GLEXT_PROTOTYPES
12 #define GL_GLEXT_PROTOTYPES 1
13 #endif
14 #include <GLES2/gl2ext.h>
15 #include <GLES2/gl2extchromium.h>
16
17 #include "base/bind.h"
18 #include "base/bind_helpers.h"
19 #include "base/lazy_instance.h"
20 #include "base/logging.h"
21 #include "base/memory/weak_ptr.h"
22 #include "base/message_loop/message_loop_proxy.h"
23 #include "base/sequence_checker.h"
24 #include "base/threading/thread.h"
25 #include "gpu/command_buffer/service/command_buffer_service.h"
26 #include "gpu/command_buffer/service/context_group.h"
27 #include "gpu/command_buffer/service/gl_context_virtual.h"
28 #include "gpu/command_buffer/service/gpu_control_service.h"
29 #include "gpu/command_buffer/service/gpu_scheduler.h"
30 #include "gpu/command_buffer/service/image_manager.h"
31 #include "gpu/command_buffer/service/transfer_buffer_manager.h"
32 #include "ui/gfx/size.h"
33 #include "ui/gl/gl_context.h"
34 #include "ui/gl/gl_image.h"
35 #include "ui/gl/gl_share_group.h"
36
37 #if defined(OS_ANDROID)
38 #include "gpu/command_buffer/service/stream_texture_manager_in_process_android.h"
39 #include "ui/gl/android/surface_texture.h"
40 #endif
41
42 namespace gpu {
43
44 namespace {
45
46 static base::LazyInstance<std::set<InProcessCommandBuffer*> >
47     g_all_shared_contexts = LAZY_INSTANCE_INITIALIZER;
48
49 static bool g_use_virtualized_gl_context = false;
50 static bool g_uses_explicit_scheduling = false;
51 static GpuMemoryBufferFactory* g_gpu_memory_buffer_factory = NULL;
52
53 template <typename T>
54 static void RunTaskWithResult(base::Callback<T(void)> task,
55                               T* result,
56                               base::WaitableEvent* completion) {
57   *result = task.Run();
58   completion->Signal();
59 }
60
61 class GpuInProcessThread
62     : public base::Thread,
63       public base::RefCountedThreadSafe<GpuInProcessThread> {
64  public:
65   GpuInProcessThread();
66
67  private:
68   friend class base::RefCountedThreadSafe<GpuInProcessThread>;
69   virtual ~GpuInProcessThread();
70
71   DISALLOW_COPY_AND_ASSIGN(GpuInProcessThread);
72 };
73
74 GpuInProcessThread::GpuInProcessThread() : base::Thread("GpuThread") {
75   Start();
76 }
77
78 GpuInProcessThread::~GpuInProcessThread() {
79   Stop();
80 }
81
82 // Used with explicit scheduling when there is no dedicated GPU thread.
83 class GpuCommandQueue {
84  public:
85   GpuCommandQueue();
86   ~GpuCommandQueue();
87
88   void QueueTask(const base::Closure& task);
89   void RunTasks();
90   void SetScheduleCallback(const base::Closure& callback);
91
92  private:
93   base::Lock tasks_lock_;
94   std::queue<base::Closure> tasks_;
95   base::Closure schedule_callback_;
96
97   DISALLOW_COPY_AND_ASSIGN(GpuCommandQueue);
98 };
99
100 GpuCommandQueue::GpuCommandQueue() {}
101
102 GpuCommandQueue::~GpuCommandQueue() {
103   base::AutoLock lock(tasks_lock_);
104   DCHECK(tasks_.empty());
105 }
106
107 void GpuCommandQueue::QueueTask(const base::Closure& task) {
108   {
109     base::AutoLock lock(tasks_lock_);
110     tasks_.push(task);
111   }
112
113   DCHECK(!schedule_callback_.is_null());
114   schedule_callback_.Run();
115 }
116
117 void GpuCommandQueue::RunTasks() {
118   size_t num_tasks;
119   {
120     base::AutoLock lock(tasks_lock_);
121     num_tasks = tasks_.size();
122   }
123
124   while (num_tasks) {
125     base::Closure task;
126     {
127       base::AutoLock lock(tasks_lock_);
128       task = tasks_.front();
129       tasks_.pop();
130       num_tasks = tasks_.size();
131     }
132
133     task.Run();
134   }
135 }
136
137 void GpuCommandQueue::SetScheduleCallback(const base::Closure& callback) {
138   DCHECK(schedule_callback_.is_null());
139   schedule_callback_ = callback;
140 }
141
142 static base::LazyInstance<GpuCommandQueue> g_gpu_queue =
143     LAZY_INSTANCE_INITIALIZER;
144
145 class SchedulerClientBase : public InProcessCommandBuffer::SchedulerClient {
146  public:
147   explicit SchedulerClientBase(bool need_thread);
148   virtual ~SchedulerClientBase();
149
150   static bool HasClients();
151
152  protected:
153   scoped_refptr<GpuInProcessThread> thread_;
154
155  private:
156   static base::LazyInstance<std::set<SchedulerClientBase*> > all_clients_;
157   static base::LazyInstance<base::Lock> all_clients_lock_;
158 };
159
160 base::LazyInstance<std::set<SchedulerClientBase*> >
161     SchedulerClientBase::all_clients_ = LAZY_INSTANCE_INITIALIZER;
162 base::LazyInstance<base::Lock> SchedulerClientBase::all_clients_lock_ =
163     LAZY_INSTANCE_INITIALIZER;
164
165 SchedulerClientBase::SchedulerClientBase(bool need_thread) {
166   base::AutoLock(all_clients_lock_.Get());
167   if (need_thread) {
168     if (!all_clients_.Get().empty()) {
169       SchedulerClientBase* other = *all_clients_.Get().begin();
170       thread_ = other->thread_;
171       DCHECK(thread_.get());
172     } else {
173       thread_ = new GpuInProcessThread;
174     }
175   }
176   all_clients_.Get().insert(this);
177 }
178
179 SchedulerClientBase::~SchedulerClientBase() {
180   base::AutoLock(all_clients_lock_.Get());
181   all_clients_.Get().erase(this);
182 }
183
184 bool SchedulerClientBase::HasClients() {
185   base::AutoLock(all_clients_lock_.Get());
186   return !all_clients_.Get().empty();
187 }
188
189 // A client that talks to the GPU thread
190 class ThreadClient : public SchedulerClientBase {
191  public:
192   ThreadClient();
193   virtual void QueueTask(const base::Closure& task) OVERRIDE;
194   virtual void ScheduleIdleWork(const base::Closure& callback) OVERRIDE;
195 };
196
197 ThreadClient::ThreadClient() : SchedulerClientBase(true) {
198   DCHECK(thread_.get());
199 }
200
201 void ThreadClient::QueueTask(const base::Closure& task) {
202   thread_->message_loop()->PostTask(FROM_HERE, task);
203 }
204
205 void ThreadClient::ScheduleIdleWork(const base::Closure& callback) {
206   thread_->message_loop()->PostDelayedTask(
207       FROM_HERE, callback, base::TimeDelta::FromMilliseconds(5));
208 }
209
210 // A client that talks to the GpuCommandQueue
211 class QueueClient : public SchedulerClientBase {
212  public:
213   QueueClient();
214   virtual void QueueTask(const base::Closure& task) OVERRIDE;
215   virtual void ScheduleIdleWork(const base::Closure& callback) OVERRIDE;
216 };
217
218 QueueClient::QueueClient() : SchedulerClientBase(false) {
219   DCHECK(!thread_.get());
220 }
221
222 void QueueClient::QueueTask(const base::Closure& task) {
223   g_gpu_queue.Get().QueueTask(task);
224 }
225
226 void QueueClient::ScheduleIdleWork(const base::Closure& callback) {
227   // TODO(sievers): Should this do anything?
228 }
229
230 static scoped_ptr<InProcessCommandBuffer::SchedulerClient>
231 CreateSchedulerClient() {
232   scoped_ptr<InProcessCommandBuffer::SchedulerClient> client;
233   if (g_uses_explicit_scheduling)
234     client.reset(new QueueClient);
235   else
236     client.reset(new ThreadClient);
237
238   return client.Pass();
239 }
240
241 class ScopedEvent {
242  public:
243   ScopedEvent(base::WaitableEvent* event) : event_(event) {}
244   ~ScopedEvent() { event_->Signal(); }
245
246  private:
247   base::WaitableEvent* event_;
248 };
249
250 }  // anonyous namespace
251
252 InProcessCommandBuffer::InProcessCommandBuffer()
253     : context_lost_(false),
254       share_group_id_(0),
255       last_put_offset_(-1),
256       supports_gpu_memory_buffer_(false),
257       flush_event_(false, false),
258       queue_(CreateSchedulerClient()),
259       gpu_thread_weak_ptr_factory_(this) {}
260
261 InProcessCommandBuffer::~InProcessCommandBuffer() {
262   Destroy();
263 }
264
265 bool InProcessCommandBuffer::IsContextLost() {
266   CheckSequencedThread();
267   if (context_lost_ || !command_buffer_) {
268     return true;
269   }
270   CommandBuffer::State state = GetState();
271   return error::IsError(state.error);
272 }
273
274 void InProcessCommandBuffer::OnResizeView(gfx::Size size, float scale_factor) {
275   CheckSequencedThread();
276   DCHECK(!surface_->IsOffscreen());
277   surface_->Resize(size);
278 }
279
280 bool InProcessCommandBuffer::MakeCurrent() {
281   CheckSequencedThread();
282   command_buffer_lock_.AssertAcquired();
283
284   if (!context_lost_ && decoder_->MakeCurrent())
285     return true;
286   DLOG(ERROR) << "Context lost because MakeCurrent failed.";
287   command_buffer_->SetContextLostReason(decoder_->GetContextLostReason());
288   command_buffer_->SetParseError(gpu::error::kLostContext);
289   return false;
290 }
291
292 void InProcessCommandBuffer::PumpCommands() {
293   CheckSequencedThread();
294   command_buffer_lock_.AssertAcquired();
295
296   if (!MakeCurrent())
297     return;
298
299   gpu_scheduler_->PutChanged();
300 }
301
302 bool InProcessCommandBuffer::GetBufferChanged(int32 transfer_buffer_id) {
303   CheckSequencedThread();
304   command_buffer_lock_.AssertAcquired();
305   command_buffer_->SetGetBuffer(transfer_buffer_id);
306   return true;
307 }
308
309 bool InProcessCommandBuffer::Initialize(
310     scoped_refptr<gfx::GLSurface> surface,
311     bool is_offscreen,
312     bool share_resources,
313     gfx::AcceleratedWidget window,
314     const gfx::Size& size,
315     const std::vector<int32>& attribs,
316     gfx::GpuPreference gpu_preference,
317     const base::Closure& context_lost_callback,
318     unsigned int share_group_id) {
319
320   share_resources_ = share_resources;
321   context_lost_callback_ = WrapCallback(context_lost_callback);
322   share_group_id_ = share_group_id;
323
324   if (surface) {
325     // GPU thread must be the same as client thread due to GLSurface not being
326     // thread safe.
327     sequence_checker_.reset(new base::SequenceChecker);
328     surface_ = surface;
329   }
330
331   base::Callback<bool(void)> init_task =
332       base::Bind(&InProcessCommandBuffer::InitializeOnGpuThread,
333                  base::Unretained(this),
334                  is_offscreen,
335                  window,
336                  size,
337                  attribs,
338                  gpu_preference);
339
340   base::WaitableEvent completion(true, false);
341   bool result = false;
342   QueueTask(
343       base::Bind(&RunTaskWithResult<bool>, init_task, &result, &completion));
344   completion.Wait();
345   return result;
346 }
347
348 bool InProcessCommandBuffer::InitializeOnGpuThread(
349     bool is_offscreen,
350     gfx::AcceleratedWidget window,
351     const gfx::Size& size,
352     const std::vector<int32>& attribs,
353     gfx::GpuPreference gpu_preference) {
354   CheckSequencedThread();
355   gpu_thread_weak_ptr_ = gpu_thread_weak_ptr_factory_.GetWeakPtr();
356   // Use one share group for all contexts.
357   CR_DEFINE_STATIC_LOCAL(scoped_refptr<gfx::GLShareGroup>, share_group,
358                          (new gfx::GLShareGroup));
359
360   DCHECK(size.width() >= 0 && size.height() >= 0);
361
362   TransferBufferManager* manager = new TransferBufferManager();
363   transfer_buffer_manager_.reset(manager);
364   manager->Initialize();
365
366   scoped_ptr<CommandBufferService> command_buffer(
367       new CommandBufferService(transfer_buffer_manager_.get()));
368   command_buffer->SetPutOffsetChangeCallback(base::Bind(
369       &InProcessCommandBuffer::PumpCommands, gpu_thread_weak_ptr_));
370   command_buffer->SetParseErrorCallback(base::Bind(
371       &InProcessCommandBuffer::OnContextLost, gpu_thread_weak_ptr_));
372
373   if (!command_buffer->Initialize()) {
374     LOG(ERROR) << "Could not initialize command buffer.";
375     DestroyOnGpuThread();
376     return false;
377   }
378
379   InProcessCommandBuffer* context_group = NULL;
380
381   if (share_resources_ && !g_all_shared_contexts.Get().empty()) {
382     DCHECK(share_group_id_);
383     for (std::set<InProcessCommandBuffer*>::iterator it =
384              g_all_shared_contexts.Get().begin();
385          it != g_all_shared_contexts.Get().end();
386          ++it) {
387       if ((*it)->share_group_id_ == share_group_id_) {
388         context_group = *it;
389         DCHECK(context_group->share_resources_);
390         context_lost_ = context_group->IsContextLost();
391         break;
392       }
393     }
394     if (!context_group)
395       share_group = new gfx::GLShareGroup;
396   }
397
398   StreamTextureManager* stream_texture_manager = NULL;
399 #if defined(OS_ANDROID)
400   stream_texture_manager = stream_texture_manager_ =
401       context_group ? context_group->stream_texture_manager_.get()
402                     : new StreamTextureManagerInProcess;
403 #endif
404
405   bool bind_generates_resource = false;
406   decoder_.reset(gles2::GLES2Decoder::Create(
407       context_group ? context_group->decoder_->GetContextGroup()
408                     : new gles2::ContextGroup(NULL,
409                                               NULL,
410                                               NULL,
411                                               stream_texture_manager,
412                                               bind_generates_resource)));
413
414   gpu_scheduler_.reset(
415       new GpuScheduler(command_buffer.get(), decoder_.get(), decoder_.get()));
416   command_buffer->SetGetBufferChangeCallback(base::Bind(
417       &GpuScheduler::SetGetBuffer, base::Unretained(gpu_scheduler_.get())));
418   command_buffer_ = command_buffer.Pass();
419
420   decoder_->set_engine(gpu_scheduler_.get());
421
422   if (!surface_) {
423     if (is_offscreen)
424       surface_ = gfx::GLSurface::CreateOffscreenGLSurface(size);
425     else
426       surface_ = gfx::GLSurface::CreateViewGLSurface(window);
427   }
428
429   if (!surface_.get()) {
430     LOG(ERROR) << "Could not create GLSurface.";
431     DestroyOnGpuThread();
432     return false;
433   }
434
435   if (g_use_virtualized_gl_context) {
436     context_ = share_group->GetSharedContext();
437     if (!context_.get()) {
438       context_ = gfx::GLContext::CreateGLContext(
439           share_group.get(), surface_.get(), gpu_preference);
440       share_group->SetSharedContext(context_.get());
441     }
442
443     context_ = new GLContextVirtual(
444         share_group.get(), context_.get(), decoder_->AsWeakPtr());
445     if (context_->Initialize(surface_.get(), gpu_preference)) {
446       VLOG(1) << "Created virtual GL context.";
447     } else {
448       context_ = NULL;
449     }
450   } else {
451     context_ = gfx::GLContext::CreateGLContext(
452         share_group.get(), surface_.get(), gpu_preference);
453   }
454
455   if (!context_.get()) {
456     LOG(ERROR) << "Could not create GLContext.";
457     DestroyOnGpuThread();
458     return false;
459   }
460
461   if (!context_->MakeCurrent(surface_.get())) {
462     LOG(ERROR) << "Could not make context current.";
463     DestroyOnGpuThread();
464     return false;
465   }
466
467   gles2::DisallowedFeatures disallowed_features;
468   disallowed_features.swap_buffer_complete_callback = true;
469   disallowed_features.gpu_memory_manager = true;
470   if (!decoder_->Initialize(surface_,
471                             context_,
472                             is_offscreen,
473                             size,
474                             disallowed_features,
475                             attribs)) {
476     LOG(ERROR) << "Could not initialize decoder.";
477     DestroyOnGpuThread();
478     return false;
479   }
480
481   gpu_control_.reset(
482       new GpuControlService(decoder_->GetContextGroup()->image_manager(),
483                             g_gpu_memory_buffer_factory,
484                             decoder_->GetContextGroup()->mailbox_manager(),
485                             decoder_->GetQueryManager()));
486   supports_gpu_memory_buffer_ = gpu_control_->SupportsGpuMemoryBuffer();
487
488
489   if (!is_offscreen) {
490     decoder_->SetResizeCallback(base::Bind(
491         &InProcessCommandBuffer::OnResizeView, gpu_thread_weak_ptr_));
492   }
493
494   if (share_resources_) {
495     g_all_shared_contexts.Pointer()->insert(this);
496   }
497
498   return true;
499 }
500
501 void InProcessCommandBuffer::Destroy() {
502   CheckSequencedThread();
503
504   base::WaitableEvent completion(true, false);
505   bool result = false;
506   base::Callback<bool(void)> destroy_task = base::Bind(
507       &InProcessCommandBuffer::DestroyOnGpuThread, base::Unretained(this));
508   QueueTask(
509       base::Bind(&RunTaskWithResult<bool>, destroy_task, &result, &completion));
510   completion.Wait();
511 }
512
513 bool InProcessCommandBuffer::DestroyOnGpuThread() {
514   CheckSequencedThread();
515   gpu_thread_weak_ptr_factory_.InvalidateWeakPtrs();
516   command_buffer_.reset();
517   // Clean up GL resources if possible.
518   bool have_context = context_ && context_->MakeCurrent(surface_);
519   if (decoder_) {
520     decoder_->Destroy(have_context);
521     decoder_.reset();
522   }
523   context_ = NULL;
524   surface_ = NULL;
525
526   g_all_shared_contexts.Pointer()->erase(this);
527   return true;
528 }
529
530 void InProcessCommandBuffer::CheckSequencedThread() {
531   DCHECK(!sequence_checker_ ||
532          sequence_checker_->CalledOnValidSequencedThread());
533 }
534
535 void InProcessCommandBuffer::OnContextLost() {
536   CheckSequencedThread();
537   if (!context_lost_callback_.is_null()) {
538     context_lost_callback_.Run();
539     context_lost_callback_.Reset();
540   }
541
542   context_lost_ = true;
543   if (share_resources_) {
544     for (std::set<InProcessCommandBuffer*>::iterator it =
545              g_all_shared_contexts.Get().begin();
546          it != g_all_shared_contexts.Get().end();
547          ++it) {
548       (*it)->context_lost_ = true;
549     }
550   }
551 }
552
553 CommandBuffer::State InProcessCommandBuffer::GetStateFast() {
554   CheckSequencedThread();
555   base::AutoLock lock(state_after_last_flush_lock_);
556   if (state_after_last_flush_.generation - last_state_.generation < 0x80000000U)
557     last_state_ = state_after_last_flush_;
558   return last_state_;
559 }
560
561 CommandBuffer::State InProcessCommandBuffer::GetState() {
562   CheckSequencedThread();
563   return GetStateFast();
564 }
565
566 CommandBuffer::State InProcessCommandBuffer::GetLastState() {
567   CheckSequencedThread();
568   return last_state_;
569 }
570
571 int32 InProcessCommandBuffer::GetLastToken() {
572   CheckSequencedThread();
573   GetStateFast();
574   return last_state_.token;
575 }
576
577 void InProcessCommandBuffer::FlushOnGpuThread(int32 put_offset) {
578   CheckSequencedThread();
579   ScopedEvent handle_flush(&flush_event_);
580   base::AutoLock lock(command_buffer_lock_);
581   command_buffer_->Flush(put_offset);
582   {
583     // Update state before signaling the flush event.
584     base::AutoLock lock(state_after_last_flush_lock_);
585     state_after_last_flush_ = command_buffer_->GetState();
586   }
587   DCHECK((!error::IsError(state_after_last_flush_.error) && !context_lost_) ||
588          (error::IsError(state_after_last_flush_.error) && context_lost_));
589
590   // If we've processed all pending commands but still have pending queries,
591   // pump idle work until the query is passed.
592   if (put_offset == state_after_last_flush_.get_offset &&
593       gpu_scheduler_->HasMoreWork()) {
594     queue_->ScheduleIdleWork(
595         base::Bind(&InProcessCommandBuffer::ScheduleMoreIdleWork,
596                    gpu_thread_weak_ptr_));
597   }
598 }
599
600 void InProcessCommandBuffer::ScheduleMoreIdleWork() {
601   CheckSequencedThread();
602   base::AutoLock lock(command_buffer_lock_);
603   if (gpu_scheduler_->HasMoreWork()) {
604     gpu_scheduler_->PerformIdleWork();
605     queue_->ScheduleIdleWork(
606         base::Bind(&InProcessCommandBuffer::ScheduleMoreIdleWork,
607                    gpu_thread_weak_ptr_));
608   }
609 }
610
611 void InProcessCommandBuffer::Flush(int32 put_offset) {
612   CheckSequencedThread();
613   if (last_state_.error != gpu::error::kNoError)
614     return;
615
616   if (last_put_offset_ == put_offset)
617     return;
618
619   last_put_offset_ = put_offset;
620   base::Closure task = base::Bind(&InProcessCommandBuffer::FlushOnGpuThread,
621                                   gpu_thread_weak_ptr_,
622                                   put_offset);
623   QueueTask(task);
624 }
625
626 CommandBuffer::State InProcessCommandBuffer::FlushSync(int32 put_offset,
627                                                        int32 last_known_get) {
628   CheckSequencedThread();
629   if (put_offset == last_known_get || last_state_.error != gpu::error::kNoError)
630     return last_state_;
631
632   Flush(put_offset);
633   GetStateFast();
634   while (last_known_get == last_state_.get_offset &&
635          last_state_.error == gpu::error::kNoError) {
636     flush_event_.Wait();
637     GetStateFast();
638   }
639
640   return last_state_;
641 }
642
643 void InProcessCommandBuffer::SetGetBuffer(int32 shm_id) {
644   CheckSequencedThread();
645   if (last_state_.error != gpu::error::kNoError)
646     return;
647
648   {
649     base::AutoLock lock(command_buffer_lock_);
650     command_buffer_->SetGetBuffer(shm_id);
651     last_put_offset_ = 0;
652   }
653   {
654     base::AutoLock lock(state_after_last_flush_lock_);
655     state_after_last_flush_ = command_buffer_->GetState();
656   }
657 }
658
659 gpu::Buffer InProcessCommandBuffer::CreateTransferBuffer(size_t size,
660                                                          int32* id) {
661   CheckSequencedThread();
662   base::AutoLock lock(command_buffer_lock_);
663   return command_buffer_->CreateTransferBuffer(size, id);
664 }
665
666 void InProcessCommandBuffer::DestroyTransferBuffer(int32 id) {
667   CheckSequencedThread();
668   base::Closure task = base::Bind(&CommandBuffer::DestroyTransferBuffer,
669                                   base::Unretained(command_buffer_.get()),
670                                   id);
671
672   QueueTask(task);
673 }
674
675 gpu::Buffer InProcessCommandBuffer::GetTransferBuffer(int32 id) {
676   NOTREACHED();
677   return gpu::Buffer();
678 }
679
680 bool InProcessCommandBuffer::SupportsGpuMemoryBuffer() {
681   return supports_gpu_memory_buffer_;
682 }
683
684 gfx::GpuMemoryBuffer* InProcessCommandBuffer::CreateGpuMemoryBuffer(
685     size_t width,
686     size_t height,
687     unsigned internalformat,
688     int32* id) {
689   CheckSequencedThread();
690   base::AutoLock lock(command_buffer_lock_);
691   return gpu_control_->CreateGpuMemoryBuffer(width,
692                                              height,
693                                              internalformat,
694                                              id);
695 }
696
697 void InProcessCommandBuffer::DestroyGpuMemoryBuffer(int32 id) {
698   CheckSequencedThread();
699   base::Closure task = base::Bind(&GpuControl::DestroyGpuMemoryBuffer,
700                                   base::Unretained(gpu_control_.get()),
701                                   id);
702
703   QueueTask(task);
704 }
705
706 bool InProcessCommandBuffer::GenerateMailboxNames(
707     unsigned num, std::vector<gpu::Mailbox>* names) {
708   CheckSequencedThread();
709   base::AutoLock lock(command_buffer_lock_);
710   return gpu_control_->GenerateMailboxNames(num, names);
711 }
712
713 uint32 InProcessCommandBuffer::InsertSyncPoint() {
714   NOTREACHED();
715   return 0;
716 }
717
718 void InProcessCommandBuffer::SignalSyncPoint(unsigned sync_point,
719                                              const base::Closure& callback) {
720   CheckSequencedThread();
721   QueueTask(WrapCallback(callback));
722 }
723
724 void InProcessCommandBuffer::SignalQuery(unsigned query,
725                                          const base::Closure& callback) {
726   CheckSequencedThread();
727   QueueTask(base::Bind(&GpuControl::SignalQuery,
728                        base::Unretained(gpu_control_.get()),
729                        query,
730                        WrapCallback(callback)));
731 }
732
733 void InProcessCommandBuffer::SendManagedMemoryStats(
734     const gpu::ManagedMemoryStats& stats) {
735 }
736
737 gpu::error::Error InProcessCommandBuffer::GetLastError() {
738   CheckSequencedThread();
739   return last_state_.error;
740 }
741
742 bool InProcessCommandBuffer::Initialize() {
743   NOTREACHED();
744   return false;
745 }
746
747 void InProcessCommandBuffer::SetGetOffset(int32 get_offset) { NOTREACHED(); }
748
749 void InProcessCommandBuffer::SetToken(int32 token) { NOTREACHED(); }
750
751 void InProcessCommandBuffer::SetParseError(gpu::error::Error error) {
752   NOTREACHED();
753 }
754
755 void InProcessCommandBuffer::SetContextLostReason(
756     gpu::error::ContextLostReason reason) {
757   NOTREACHED();
758 }
759
760 namespace {
761
762 void PostCallback(const scoped_refptr<base::MessageLoopProxy>& loop,
763                          const base::Closure& callback) {
764   if (!loop->BelongsToCurrentThread()) {
765     loop->PostTask(FROM_HERE, callback);
766   } else {
767     callback.Run();
768   }
769 }
770
771 void RunOnTargetThread(scoped_ptr<base::Closure> callback) {
772   DCHECK(callback.get());
773   callback->Run();
774 }
775
776 }  // anonymous namespace
777
778 base::Closure InProcessCommandBuffer::WrapCallback(
779     const base::Closure& callback) {
780   // Make sure the callback gets deleted on the target thread by passing
781   // ownership.
782   scoped_ptr<base::Closure> scoped_callback(new base::Closure(callback));
783   base::Closure callback_on_client_thread =
784       base::Bind(&RunOnTargetThread, base::Passed(&scoped_callback));
785   base::Closure wrapped_callback =
786       base::Bind(&PostCallback, base::MessageLoopProxy::current(),
787                  callback_on_client_thread);
788   return wrapped_callback;
789 }
790
791 #if defined(OS_ANDROID)
792 scoped_refptr<gfx::SurfaceTexture>
793 InProcessCommandBuffer::GetSurfaceTexture(uint32 stream_id) {
794   DCHECK(stream_texture_manager_);
795   return stream_texture_manager_->GetSurfaceTexture(stream_id);
796 }
797 #endif
798
799 // static
800 void InProcessCommandBuffer::EnableVirtualizedContext() {
801   g_use_virtualized_gl_context = true;
802 }
803
804 // static
805 void InProcessCommandBuffer::SetScheduleCallback(
806     const base::Closure& callback) {
807   DCHECK(!g_uses_explicit_scheduling);
808   DCHECK(!SchedulerClientBase::HasClients());
809   g_uses_explicit_scheduling = true;
810   g_gpu_queue.Get().SetScheduleCallback(callback);
811 }
812
813 // static
814 void InProcessCommandBuffer::ProcessGpuWorkOnCurrentThread() {
815   g_gpu_queue.Get().RunTasks();
816 }
817
818 // static
819 void InProcessCommandBuffer::SetGpuMemoryBufferFactory(
820     GpuMemoryBufferFactory* factory) {
821   g_gpu_memory_buffer_factory = factory;
822 }
823
824 }  // namespace gpu